Allow Xen page size != 16KB.
Decorelate XSI size and page_size.
Separate mapped_regs and vpd to save memory in non-VTi mode.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
int i;
cpuid3_t cpuid3;
vpd_t *vpd;
+ mapped_regs_t *mregs;
vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
if (!vpd) {
printk("vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t));
memset(vpd, 0, VPD_SIZE);
+ mregs = &vpd->vpd_low;
+
/* CPUID init */
for (i = 0; i < 5; i++)
- vpd->vcpuid[i] = ia64_get_cpuid(i);
+ mregs->vcpuid[i] = ia64_get_cpuid(i);
/* Limit the CPUID number to 5 */
- cpuid3.value = vpd->vcpuid[3];
+ cpuid3.value = mregs->vcpuid[3];
cpuid3.number = 4; /* 5 - 1 */
- vpd->vcpuid[3] = cpuid3.value;
+ mregs->vcpuid[3] = cpuid3.value;
- vpd->vac.a_from_int_cr = 1;
- vpd->vac.a_to_int_cr = 1;
- vpd->vac.a_from_psr = 1;
- vpd->vac.a_from_cpuid = 1;
- vpd->vac.a_cover = 1;
- vpd->vac.a_bsw = 1;
+ mregs->vac.a_from_int_cr = 1;
+ mregs->vac.a_to_int_cr = 1;
+ mregs->vac.a_from_psr = 1;
+ mregs->vac.a_from_cpuid = 1;
+ mregs->vac.a_cover = 1;
+ mregs->vac.a_bsw = 1;
- vpd->vdc.d_vmsw = 1;
+ mregs->vdc.d_vmsw = 1;
return vpd;
}
vmx_create_vp(struct vcpu *v)
{
u64 ret;
- vpd_t *vpd = v->arch.privregs;
+ vpd_t *vpd = (vpd_t *)v->arch.privregs;
u64 ivt_base;
extern char vmx_ia64_ivt;
/* ia64_ivt is function pointer, so need this tranlation */
vpd = alloc_vpd();
ASSERT(vpd);
- v->arch.privregs = vpd;
- vpd->virt_env_vaddr = vm_buffer;
+ v->arch.privregs = (mapped_regs_t *)vpd;
+ vpd->vpd_low.virt_env_vaddr = vm_buffer;
/* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
* to this solution. Maybe it can be deferred until we know created
vmx_relinquish_vcpu_resources(v);
else {
if (v->arch.privregs != NULL)
- free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
+ free_xenheap_pages(v->arch.privregs,
+ get_order_from_shift(XMAPPEDREGS_SHIFT));
}
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
if (is_idle_domain(d))
return 0;
- if ((d->shared_info = (void *)alloc_xenheap_page()) == NULL)
+ d->shared_info = alloc_xenheap_pages(get_order_from_shift(XSI_SHIFT));
+ if (d->shared_info == NULL)
goto fail_nomem;
- memset(d->shared_info, 0, PAGE_SIZE);
- share_xen_page_with_guest(virt_to_page(d->shared_info),
- d, XENSHARE_writable);
+ memset(d->shared_info, 0, XSI_SIZE);
+ for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
+ share_xen_page_with_guest(virt_to_page((char *)d->shared_info + i),
+ d, XENSHARE_writable);
d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
/* We may also need emulation rid for region4, though it's unlikely
if (d->arch.mm.pgd != NULL)
pgd_free(d->arch.mm.pgd);
if (d->shared_info != NULL)
- free_xenheap_page(d->shared_info);
+ free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
return -ENOMEM;
}
{
BUG_ON(d->arch.mm.pgd != NULL);
if (d->shared_info != NULL)
- free_xenheap_page(d->shared_info);
+ free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
domain_flush_destroy (d);
shladd r24=r19,3,r24 // r24=&pte[pte_offset(addr)]
;;
(p7) ld8 r24=[r24] // r24=pte[pte_offset(addr)]
- mov r25=0x700|(_PAGE_SIZE_16K<<2) // key=7
+ mov r25=0x700|(PAGE_SHIFT<<2) // key=7
(p6) br.spnt.few frametable_fault
;;
mov cr.itir=r25
UINT64 val;
UINT64 itir, ifa;
-// FIXME: Handle faults appropriately for these
if (!iim || iim > HYPERPRIVOP_MAX) {
- panic_domain(regs, "bad hyperprivop ignored; iim=%lx, "
- "iip=0x%lx\n", iim, regs->cr_iip);
+ panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n",
+ iim, regs->cr_iip);
return 1;
}
slow_hyperpriv_cnt[iim]++;
#endif
// Shared info
- mov r24=PAGE_SHIFT<<2
+ mov r24=XSI_SHIFT<<2
movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
;;
ptr.d in3,r24
// Map mapped_regs
mov r22=XMAPPEDREGS_OFS
- mov r24=PAGE_SHIFT<<2
+ mov r24=XMAPPEDREGS_SHIFT<<2
;;
add r22=r22,in3
;;
#include <linux/efi.h>
#include <asm/iosapic.h>
-/* Be sure the struct shared_info fits on a page because it is mapped in
- domain. */
-#if SHARED_INFO_SIZE > PAGE_SIZE
- #error "struct shared_info does not not fit in PAGE_SIZE"
+/* Be sure the struct shared_info size is <= XSI_SIZE. */
+#if SHARED_INFO_SIZE > XSI_SIZE
+#error "struct shared_info bigger than XSI_SIZE"
#endif
unsigned long xenheap_phys_end, total_pages;
return order;
}
+static inline int get_order_from_shift(unsigned long shift)
+{
+ if (shift <= PAGE_SHIFT)
+ return 0;
+ else
+ return shift - PAGE_SHIFT;
+}
#endif
#undef __pa
unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
};
};
+};
+typedef struct mapped_regs mapped_regs_t;
+
+struct vpd {
+ struct mapped_regs vpd_low;
unsigned long reserved6[3456];
unsigned long vmm_avail[128];
unsigned long reserved7[4096];
};
-typedef struct mapped_regs mapped_regs_t;
-typedef mapped_regs_t vpd_t;
+typedef struct vpd vpd_t;
struct arch_vcpu_info {
};
/* Address of shared_info in domain virtual space.
This is the default address, for compatibility only. */
-#define XSI_BASE 0xf100000000000000
+#define XSI_BASE 0xf100000000000000
/* Size of the shared_info area (this is not related to page size). */
-#define XSI_LOG_SIZE 14
-#define XSI_SIZE (1 << XSI_LOG_SIZE)
+#define XSI_SHIFT 14
+#define XSI_SIZE (1 << XSI_SHIFT)
/* Log size of mapped_regs area (64 KB - only 4KB is used). */
-#define XMAPPEDREGS_LOG_SIZE 16
+#define XMAPPEDREGS_SHIFT 12
/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
#define XMAPPEDREGS_OFS XSI_SIZE